/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- * vim: set ts=8 sts=4 et sw=4 tw=99: * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */#include"jit/BacktrackingAllocator.h"#include"jsprf.h"#include"jit/BitSet.h"usingnamespacejs;usingnamespacejs::jit;usingmozilla::DebugOnly;/////////////////////////////////////////////////////////////////////// Utility/////////////////////////////////////////////////////////////////////staticinlineboolSortBefore(UsePosition*a,UsePosition*b){returna->pos<=b->pos;}staticinlineboolSortBefore(LiveRange::BundleLink*a,LiveRange::BundleLink*b){LiveRange*rangea=LiveRange::get(a);LiveRange*rangeb=LiveRange::get(b);MOZ_ASSERT(!rangea->intersects(rangeb));returnrangea->from()<rangeb->from();}staticinlineboolSortBefore(LiveRange::RegisterLink*a,LiveRange::RegisterLink*b){returnLiveRange::get(a)->from()<=LiveRange::get(b)->from();}template<typenameT>staticinlinevoidInsertSortedList(InlineForwardList<T>&list,T*value){if(list.empty()){list.pushFront(value);return;}if(SortBefore(list.back(),value)){list.pushBack(value);return;}T*prev=nullptr;for(InlineForwardListIterator<T>iter=list.begin();iter;iter++){if(SortBefore(value,*iter))break;prev=*iter;}if(prev)list.insertAfter(prev,value);elselist.pushFront(value);}/////////////////////////////////////////////////////////////////////// LiveRange/////////////////////////////////////////////////////////////////////voidLiveRange::addUse(UsePosition*use){MOZ_ASSERT(covers(use->pos));InsertSortedList(uses_,use);}voidLiveRange::distributeUses(LiveRange*other){MOZ_ASSERT(other->vreg()==vreg());MOZ_ASSERT(this!=other);// Move over all uses which fit in |other|'s boundaries.for(UsePositionIteratoriter=usesBegin();iter;){UsePosition*use=*iter;if(other->covers(use->pos)){uses_.removeAndIncrement(iter);other->addUse(use);}else{iter++;}}// Distribute the definition to |other| as well, if possible.if(hasDefinition()&&from()==other->from())other->setHasDefinition();}boolLiveRange::contains(LiveRange*other)const{returnfrom()<=other->from()&&to()>=other->to();}voidLiveRange::intersect(LiveRange*other,Range*pre,Range*inside,Range*post)const{MOZ_ASSERT(pre->empty()&&inside->empty()&&post->empty());CodePositioninnerFrom=from();if(from()<other->from()){if(to()<other->from()){*pre=range_;return;}*pre=Range(from(),other->from());innerFrom=other->from();}CodePositioninnerTo=to();if(to()>other->to()){if(from()>=other->to()){*post=range_;return;}*post=Range(other->to(),to());innerTo=other->to();}if(innerFrom!=innerTo)*inside=Range(innerFrom,innerTo);}boolLiveRange::intersects(LiveRange*other)const{Rangepre,inside,post;intersect(other,&pre,&inside,&post);return!inside.empty();}/////////////////////////////////////////////////////////////////////// SpillSet/////////////////////////////////////////////////////////////////////voidSpillSet::setAllocation(LAllocationalloc){for(size_ti=0;i<numSpilledBundles();i++)spilledBundle(i)->setAllocation(alloc);}/////////////////////////////////////////////////////////////////////// LiveBundle/////////////////////////////////////////////////////////////////////#ifdef DEBUGsize_tLiveBundle::numRanges()const{size_tcount=0;for(LiveRange::BundleLinkIteratoriter=rangesBegin();iter;iter++)count++;returncount;}#endif // DEBUGLiveRange*LiveBundle::rangeFor(CodePositionpos)const{for(LiveRange::BundleLinkIteratoriter=rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);if(range->covers(pos))returnrange;}returnnullptr;}voidLiveBundle::addRange(LiveRange*range){MOZ_ASSERT(!range->bundle());range->setBundle(this);InsertSortedList(ranges_,&range->bundleLink);}boolLiveBundle::addRange(TempAllocator&alloc,uint32_tvreg,CodePositionfrom,CodePositionto){LiveRange*range=LiveRange::FallibleNew(alloc,vreg,from,to);if(!range)returnfalse;addRange(range);returntrue;}boolLiveBundle::addRangeAndDistributeUses(TempAllocator&alloc,LiveRange*oldRange,CodePositionfrom,CodePositionto){LiveRange*range=LiveRange::FallibleNew(alloc,oldRange->vreg(),from,to);if(!range)returnfalse;addRange(range);oldRange->distributeUses(range);returntrue;}LiveRange*LiveBundle::popFirstRange(){LiveRange::BundleLinkIteratoriter=rangesBegin();if(!iter)returnnullptr;LiveRange*range=LiveRange::get(*iter);ranges_.removeAt(iter);range->setBundle(nullptr);returnrange;}voidLiveBundle::removeRange(LiveRange*range){for(LiveRange::BundleLinkIteratoriter=rangesBegin();iter;iter++){LiveRange*existing=LiveRange::get(*iter);if(existing==range){ranges_.removeAt(iter);return;}}MOZ_CRASH();}/////////////////////////////////////////////////////////////////////// VirtualRegister/////////////////////////////////////////////////////////////////////boolVirtualRegister::addInitialRange(TempAllocator&alloc,CodePositionfrom,CodePositionto){MOZ_ASSERT(from<to);// Mark [from,to) as a live range for this register during the initial// liveness analysis, coalescing with any existing overlapping ranges.LiveRange*prev=nullptr;LiveRange*merged=nullptr;for(LiveRange::RegisterLinkIteratoriter(rangesBegin());iter;){LiveRange*existing=LiveRange::get(*iter);if(from>existing->to()){// The new range should go after this one.prev=existing;iter++;continue;}if(to.next()<existing->from()){// The new range should go before this one.break;}if(!merged){// This is the first old range we've found that overlaps the new// range. Extend this one to cover its union with the new range.merged=existing;if(from<existing->from())existing->setFrom(from);if(to>existing->to())existing->setTo(to);// Continue searching to see if any other old ranges can be// coalesced with the new merged range.iter++;continue;}// Coalesce this range into the previous range we merged into.MOZ_ASSERT(existing->from()>=merged->from());if(existing->to()>merged->to())merged->setTo(existing->to());MOZ_ASSERT(!existing->hasDefinition());existing->distributeUses(merged);MOZ_ASSERT(!existing->hasUses());ranges_.removeAndIncrement(iter);}if(!merged){// The new range does not overlap any existing range for the vreg.LiveRange*range=LiveRange::FallibleNew(alloc,vreg(),from,to);if(!range)returnfalse;if(prev)ranges_.insertAfter(&prev->registerLink,&range->registerLink);elseranges_.pushFront(&range->registerLink);}returntrue;}voidVirtualRegister::addInitialUse(UsePosition*use){LiveRange::get(*rangesBegin())->addUse(use);}voidVirtualRegister::setInitialDefinition(CodePositionfrom){LiveRange*first=LiveRange::get(*rangesBegin());MOZ_ASSERT(from>=first->from());first->setFrom(from);first->setHasDefinition();}LiveRange*VirtualRegister::rangeFor(CodePositionpos,boolpreferRegister/* = false */)const{LiveRange*found=nullptr;for(LiveRange::RegisterLinkIteratoriter=rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);if(range->covers(pos)){if(!preferRegister||range->bundle()->allocation().isRegister())returnrange;if(!found)found=range;}}returnfound;}voidVirtualRegister::addRange(LiveRange*range){InsertSortedList(ranges_,&range->registerLink);}voidVirtualRegister::removeRange(LiveRange*range){for(LiveRange::RegisterLinkIteratoriter=rangesBegin();iter;iter++){LiveRange*existing=LiveRange::get(*iter);if(existing==range){ranges_.removeAt(iter);return;}}MOZ_CRASH();}/////////////////////////////////////////////////////////////////////// BacktrackingAllocator/////////////////////////////////////////////////////////////////////// This function pre-allocates and initializes as much global state as possible// to avoid littering the algorithms with memory management cruft.boolBacktrackingAllocator::init(){if(!RegisterAllocator::init())returnfalse;liveIn=mir->allocate<BitSet>(graph.numBlockIds());if(!liveIn)returnfalse;size_tnumVregs=graph.numVirtualRegisters();if(!vregs.init(mir->alloc(),numVregs))returnfalse;memset(&vregs[0],0,sizeof(VirtualRegister)*numVregs);for(uint32_ti=0;i<numVregs;i++)new(&vregs[i])VirtualRegister();// Build virtual register objects.for(size_ti=0;i<graph.numBlocks();i++){if(mir->shouldCancel("Create data structures (main loop)"))returnfalse;LBlock*block=graph.getBlock(i);for(LInstructionIteratorins=block->begin();ins!=block->end();ins++){if(mir->shouldCancel("Create data structures (inner loop 1)"))returnfalse;for(size_tj=0;j<ins->numDefs();j++){LDefinition*def=ins->getDef(j);if(def->isBogusTemp())continue;vreg(def).init(*ins,def,/* isTemp = */false);}for(size_tj=0;j<ins->numTemps();j++){LDefinition*def=ins->getTemp(j);if(def->isBogusTemp())continue;vreg(def).init(*ins,def,/* isTemp = */true);}}for(size_tj=0;j<block->numPhis();j++){LPhi*phi=block->getPhi(j);LDefinition*def=phi->getDef(0);vreg(def).init(phi,def,/* isTemp = */false);}}LiveRegisterSetremainingRegisters(allRegisters_.asLiveSet());while(!remainingRegisters.emptyGeneral()){AnyRegisterreg=AnyRegister(remainingRegisters.takeAnyGeneral());registers[reg.code()].allocatable=true;}while(!remainingRegisters.emptyFloat()){AnyRegisterreg=AnyRegister(remainingRegisters.takeAnyFloat<RegTypeName::Any>());registers[reg.code()].allocatable=true;}LifoAlloc*lifoAlloc=mir->alloc().lifoAlloc();for(size_ti=0;i<AnyRegister::Total;i++){registers[i].reg=AnyRegister::FromCode(i);registers[i].allocations.setAllocator(lifoAlloc);}hotcode.setAllocator(lifoAlloc);callRanges.setAllocator(lifoAlloc);// Partition the graph into hot and cold sections, for helping to make// splitting decisions. Since we don't have any profiling data this is a// crapshoot, so just mark the bodies of inner loops as hot and everything// else as cold.LBlock*backedge=nullptr;for(size_ti=0;i<graph.numBlocks();i++){LBlock*block=graph.getBlock(i);// If we see a loop header, mark the backedge so we know when we have// hit the end of the loop. Don't process the loop immediately, so that// if there is an inner loop we will ignore the outer backedge.if(block->mir()->isLoopHeader())backedge=block->mir()->backedge()->lir();if(block==backedge){LBlock*header=block->mir()->loopHeaderOfBackedge()->lir();LiveRange*range=LiveRange::FallibleNew(alloc(),0,entryOf(header),exitOf(block).next());if(!range||!hotcode.insert(range))returnfalse;}}returntrue;}boolBacktrackingAllocator::addInitialFixedRange(AnyRegisterreg,CodePositionfrom,CodePositionto){LiveRange*range=LiveRange::FallibleNew(alloc(),0,from,to);returnrange&®isters[reg.code()].allocations.insert(range);}#ifdef DEBUG// Returns true iff ins has a def/temp reusing the input allocation.staticboolIsInputReused(LInstruction*ins,LUse*use){for(size_ti=0;i<ins->numDefs();i++){if(ins->getDef(i)->policy()==LDefinition::MUST_REUSE_INPUT&&ins->getOperand(ins->getDef(i)->getReusedInput())->toUse()==use){returntrue;}}for(size_ti=0;i<ins->numTemps();i++){if(ins->getTemp(i)->policy()==LDefinition::MUST_REUSE_INPUT&&ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse()==use){returntrue;}}returnfalse;}#endif/* * This function builds up liveness ranges for all virtual registers * defined in the function. * * The algorithm is based on the one published in: * * Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on * SSA Form." Proceedings of the International Symposium on Code Generation * and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF. * * The algorithm operates on blocks ordered such that dominators of a block * are before the block itself, and such that all blocks of a loop are * contiguous. It proceeds backwards over the instructions in this order, * marking registers live at their uses, ending their live ranges at * definitions, and recording which registers are live at the top of every * block. To deal with loop backedges, registers live at the beginning of * a loop gain a range covering the entire loop. */boolBacktrackingAllocator::buildLivenessInfo(){JitSpew(JitSpew_RegAlloc,"Beginning liveness analysis");Vector<MBasicBlock*,1,SystemAllocPolicy>loopWorkList;BitSetloopDone(graph.numBlockIds());if(!loopDone.init(alloc()))returnfalse;for(size_ti=graph.numBlocks();i>0;i--){if(mir->shouldCancel("Build Liveness Info (main loop)"))returnfalse;LBlock*block=graph.getBlock(i-1);MBasicBlock*mblock=block->mir();BitSet&live=liveIn[mblock->id()];new(&live)BitSet(graph.numVirtualRegisters());if(!live.init(alloc()))returnfalse;// Propagate liveIn from our successors to us.for(size_ti=0;i<mblock->lastIns()->numSuccessors();i++){MBasicBlock*successor=mblock->lastIns()->getSuccessor(i);// Skip backedges, as we fix them up at the loop header.if(mblock->id()<successor->id())live.insertAll(liveIn[successor->id()]);}// Add successor phis.if(mblock->successorWithPhis()){LBlock*phiSuccessor=mblock->successorWithPhis()->lir();for(unsignedintj=0;j<phiSuccessor->numPhis();j++){LPhi*phi=phiSuccessor->getPhi(j);LAllocation*use=phi->getOperand(mblock->positionInPhiSuccessor());uint32_treg=use->toUse()->virtualRegister();live.insert(reg);vreg(use).setUsedByPhi();}}// Registers are assumed alive for the entire block, a define shortens// the range to the point of definition.for(BitSet::IteratorliveRegId(live);liveRegId;++liveRegId){if(!vregs[*liveRegId].addInitialRange(alloc(),entryOf(block),exitOf(block).next()))returnfalse;}// Shorten the front end of ranges for live variables to their point of// definition, if found.for(LInstructionReverseIteratorins=block->rbegin();ins!=block->rend();ins++){// Calls may clobber registers, so force a spill and reload around the callsite.if(ins->isCall()){for(AnyRegisterIteratoriter(allRegisters_.asLiveSet());iter.more();++iter){boolfound=false;for(size_ti=0;i<ins->numDefs();i++){if(ins->getDef(i)->isFixed()&&ins->getDef(i)->output()->aliases(LAllocation(*iter))){found=true;break;}}// If this register doesn't have an explicit def above, mark// it as clobbered by the call unless it is actually// call-preserved.if(!found&&!ins->isCallPreserved(*iter)){if(!addInitialFixedRange(*iter,outputOf(*ins),outputOf(*ins).next()))returnfalse;}}CallRange*callRange=new(alloc().fallible())CallRange(outputOf(*ins),outputOf(*ins).next());if(!callRange)returnfalse;callRangesList.pushFront(callRange);if(!callRanges.insert(callRange))returnfalse;}for(size_ti=0;i<ins->numDefs();i++){LDefinition*def=ins->getDef(i);if(def->isBogusTemp())continue;CodePositionfrom=outputOf(*ins);if(def->policy()==LDefinition::MUST_REUSE_INPUT){// MUST_REUSE_INPUT is implemented by allocating an output// register and moving the input to it. Register hints are// used to avoid unnecessary moves. We give the input an// LUse::ANY policy to avoid allocating a register for the// input.LUse*inputUse=ins->getOperand(def->getReusedInput())->toUse();MOZ_ASSERT(inputUse->policy()==LUse::REGISTER);MOZ_ASSERT(inputUse->usedAtStart());*inputUse=LUse(inputUse->virtualRegister(),LUse::ANY,/* usedAtStart = */true);}if(!vreg(def).addInitialRange(alloc(),from,from.next()))returnfalse;vreg(def).setInitialDefinition(from);live.remove(def->virtualRegister());}for(size_ti=0;i<ins->numTemps();i++){LDefinition*temp=ins->getTemp(i);if(temp->isBogusTemp())continue;// Normally temps are considered to cover both the input// and output of the associated instruction. In some cases// though we want to use a fixed register as both an input// and clobbered register in the instruction, so watch for// this and shorten the temp to cover only the output.CodePositionfrom=inputOf(*ins);if(temp->policy()==LDefinition::FIXED){AnyRegisterreg=temp->output()->toRegister();for(LInstruction::InputIteratoralloc(**ins);alloc.more();alloc.next()){if(alloc->isUse()){LUse*use=alloc->toUse();if(use->isFixedRegister()){if(GetFixedRegister(vreg(use).def(),use)==reg)from=outputOf(*ins);}}}}CodePositionto=ins->isCall()?outputOf(*ins):outputOf(*ins).next();if(!vreg(temp).addInitialRange(alloc(),from,to))returnfalse;vreg(temp).setInitialDefinition(from);}DebugOnly<bool>hasUseRegister=false;DebugOnly<bool>hasUseRegisterAtStart=false;for(LInstruction::InputIteratorinputAlloc(**ins);inputAlloc.more();inputAlloc.next()){if(inputAlloc->isUse()){LUse*use=inputAlloc->toUse();// Call uses should always be at-start, since calls use all// registers.MOZ_ASSERT_IF(ins->isCall()&&!inputAlloc.isSnapshotInput(),use->usedAtStart());#ifdef DEBUG// Don't allow at-start call uses if there are temps of the same kind,// so that we don't assign the same register. Only allow this when the// use and temp are fixed registers, as they can't alias.if(ins->isCall()&&use->usedAtStart()){for(size_ti=0;i<ins->numTemps();i++){MOZ_ASSERT_IF(!ins->getTemp(i)->isBogusTemp(),vreg(ins->getTemp(i)).type()!=vreg(use).type()||(use->isFixedRegister()&&ins->getTemp(i)->isFixed()));}}// If there are both useRegisterAtStart(x) and useRegister(y)// uses, we may assign the same register to both operands// (bug 772830). Don't allow this for now.if(use->policy()==LUse::REGISTER){if(use->usedAtStart()){if(!IsInputReused(*ins,use))hasUseRegisterAtStart=true;}else{hasUseRegister=true;}}MOZ_ASSERT(!(hasUseRegister&&hasUseRegisterAtStart));#endif// Don't treat RECOVERED_INPUT uses as keeping the vreg alive.if(use->policy()==LUse::RECOVERED_INPUT)continue;CodePositionto=use->usedAtStart()?inputOf(*ins):outputOf(*ins);if(use->isFixedRegister()){LAllocationreg(AnyRegister::FromCode(use->registerCode()));for(size_ti=0;i<ins->numDefs();i++){LDefinition*def=ins->getDef(i);if(def->policy()==LDefinition::FIXED&&*def->output()==reg)to=inputOf(*ins);}}if(!vreg(use).addInitialRange(alloc(),entryOf(block),to.next()))returnfalse;UsePosition*usePosition=new(alloc().fallible())UsePosition(use,to);if(!usePosition)returnfalse;vreg(use).addInitialUse(usePosition);live.insert(use->virtualRegister());}}}// Phis have simultaneous assignment semantics at block begin, so at// the beginning of the block we can be sure that liveIn does not// contain any phi outputs.for(unsignedinti=0;i<block->numPhis();i++){LDefinition*def=block->getPhi(i)->getDef(0);if(live.contains(def->virtualRegister())){live.remove(def->virtualRegister());}else{// This is a dead phi, so add a dummy range over all phis. This// can go away if we have an earlier dead code elimination pass.CodePositionentryPos=entryOf(block);if(!vreg(def).addInitialRange(alloc(),entryPos,entryPos.next()))returnfalse;}}if(mblock->isLoopHeader()){// A divergence from the published algorithm is required here, as// our block order does not guarantee that blocks of a loop are// contiguous. As a result, a single live range spanning the// loop is not possible. Additionally, we require liveIn in a later// pass for resolution, so that must also be fixed up here.MBasicBlock*loopBlock=mblock->backedge();while(true){// Blocks must already have been visited to have a liveIn set.MOZ_ASSERT(loopBlock->id()>=mblock->id());// Add a range for this entire loop blockCodePositionfrom=entryOf(loopBlock->lir());CodePositionto=exitOf(loopBlock->lir()).next();for(BitSet::IteratorliveRegId(live);liveRegId;++liveRegId){if(!vregs[*liveRegId].addInitialRange(alloc(),from,to))returnfalse;}// Fix up the liveIn set.liveIn[loopBlock->id()].insertAll(live);// Make sure we don't visit this node againloopDone.insert(loopBlock->id());// If this is the loop header, any predecessors are either the// backedge or out of the loop, so skip any predecessors of// this blockif(loopBlock!=mblock){for(size_ti=0;i<loopBlock->numPredecessors();i++){MBasicBlock*pred=loopBlock->getPredecessor(i);if(loopDone.contains(pred->id()))continue;if(!loopWorkList.append(pred))returnfalse;}}// Terminate loop if out of work.if(loopWorkList.empty())break;// Grab the next block off the work list, skipping any OSR block.MBasicBlock*osrBlock=graph.mir().osrBlock();while(!loopWorkList.empty()){loopBlock=loopWorkList.popCopy();if(loopBlock!=osrBlock)break;}// If end is reached without finding a non-OSR block, then no more work items were found.if(loopBlock==osrBlock){MOZ_ASSERT(loopWorkList.empty());break;}}// Clear the done set for other loopsloopDone.clear();}MOZ_ASSERT_IF(!mblock->numPredecessors(),live.empty());}JitSpew(JitSpew_RegAlloc,"Liveness analysis complete");if(JitSpewEnabled(JitSpew_RegAlloc))dumpInstructions();returntrue;}boolBacktrackingAllocator::go(){JitSpew(JitSpew_RegAlloc,"Beginning register allocation");if(!init())returnfalse;if(!buildLivenessInfo())returnfalse;if(!allocationQueue.reserve(graph.numVirtualRegisters()*3/2))returnfalse;JitSpew(JitSpew_RegAlloc,"Beginning grouping and queueing registers");if(!mergeAndQueueRegisters())returnfalse;if(JitSpewEnabled(JitSpew_RegAlloc))dumpVregs();JitSpew(JitSpew_RegAlloc,"Beginning main allocation loop");// Allocate, spill and split bundles until finished.while(!allocationQueue.empty()){if(mir->shouldCancel("Backtracking Allocation"))returnfalse;QueueItemitem=allocationQueue.removeHighest();if(!processBundle(mir,item.bundle))returnfalse;}JitSpew(JitSpew_RegAlloc,"Main allocation loop complete");if(!tryAllocatingRegistersForSpillBundles())returnfalse;if(!pickStackSlots())returnfalse;if(JitSpewEnabled(JitSpew_RegAlloc))dumpAllocations();if(!resolveControlFlow())returnfalse;if(!reifyAllocations())returnfalse;if(!populateSafepoints())returnfalse;if(!annotateMoveGroups())returnfalse;returntrue;}staticboolIsArgumentSlotDefinition(LDefinition*def){returndef->policy()==LDefinition::FIXED&&def->output()->isArgument();}staticboolIsThisSlotDefinition(LDefinition*def){returnIsArgumentSlotDefinition(def)&&def->output()->toArgument()->index()<THIS_FRAME_ARGSLOT+sizeof(Value);}boolBacktrackingAllocator::tryMergeBundles(LiveBundle*bundle0,LiveBundle*bundle1){// See if bundle0 and bundle1 can be merged together.if(bundle0==bundle1)returntrue;// Get a representative virtual register from each bundle.VirtualRegister®0=vregs[bundle0->firstRange()->vreg()];VirtualRegister®1=vregs[bundle1->firstRange()->vreg()];if(!reg0.isCompatible(reg1))returntrue;// Registers which might spill to the frame's |this| slot can only be// grouped with other such registers. The frame's |this| slot must always// hold the |this| value, as required by JitFrame tracing and by the Ion// constructor calling convention.if(IsThisSlotDefinition(reg0.def())||IsThisSlotDefinition(reg1.def())){if(*reg0.def()->output()!=*reg1.def()->output())returntrue;}// Registers which might spill to the frame's argument slots can only be// grouped with other such registers if the frame might access those// arguments through a lazy arguments object or rest parameter.if(IsArgumentSlotDefinition(reg0.def())||IsArgumentSlotDefinition(reg1.def())){if(graph.mir().entryBlock()->info().mayReadFrameArgsDirectly()){if(*reg0.def()->output()!=*reg1.def()->output())returntrue;}}// Limit the number of times we compare ranges if there are many ranges in// one of the bundles, to avoid quadratic behavior.staticconstsize_tMAX_RANGES=200;// Make sure that ranges in the bundles do not overlap.LiveRange::BundleLinkIteratoriter0=bundle0->rangesBegin(),iter1=bundle1->rangesBegin();size_tcount=0;while(iter0&&iter1){if(++count>=MAX_RANGES)returntrue;LiveRange*range0=LiveRange::get(*iter0);LiveRange*range1=LiveRange::get(*iter1);if(range0->from()>=range1->to())iter1++;elseif(range1->from()>=range0->to())iter0++;elsereturntrue;}// Move all ranges from bundle1 into bundle0.while(LiveRange*range=bundle1->popFirstRange())bundle0->addRange(range);returntrue;}staticinlineLDefinition*FindReusingDefOrTemp(LNode*ins,LAllocation*alloc){for(size_ti=0;i<ins->numDefs();i++){LDefinition*def=ins->getDef(i);if(def->policy()==LDefinition::MUST_REUSE_INPUT&&ins->getOperand(def->getReusedInput())==alloc)returndef;}for(size_ti=0;i<ins->numTemps();i++){LDefinition*def=ins->getTemp(i);if(def->policy()==LDefinition::MUST_REUSE_INPUT&&ins->getOperand(def->getReusedInput())==alloc)returndef;}returnnullptr;}staticinlinesize_tNumReusingDefs(LNode*ins){size_tnum=0;for(size_ti=0;i<ins->numDefs();i++){LDefinition*def=ins->getDef(i);if(def->policy()==LDefinition::MUST_REUSE_INPUT)num++;}returnnum;}boolBacktrackingAllocator::tryMergeReusedRegister(VirtualRegister&def,VirtualRegister&input){// def is a vreg which reuses input for its output physical register. Try// to merge ranges for def with those of input if possible, as avoiding// copies before def's instruction is crucial for generated code quality// (MUST_REUSE_INPUT is used for all arithmetic on x86/x64).if(def.rangeFor(inputOf(def.ins()))){MOZ_ASSERT(def.isTemp());def.setMustCopyInput();returntrue;}LiveRange*inputRange=input.rangeFor(outputOf(def.ins()));if(!inputRange){// The input is not live after the instruction, either in a safepoint// for the instruction or in subsequent code. The input and output// can thus be in the same group.returntryMergeBundles(def.firstBundle(),input.firstBundle());}// The input is live afterwards, either in future instructions or in a// safepoint for the reusing instruction. This is impossible to satisfy// without copying the input.//// It may or may not be better to split the input into two bundles at the// point of the definition, which may permit merging. One case where it is// definitely better to split is if the input never has any register uses// after the instruction. Handle this splitting eagerly.LBlock*block=def.ins()->block();// The input's lifetime must end within the same block as the definition,// otherwise it could live on in phis elsewhere.if(inputRange!=input.lastRange()||inputRange->to()>exitOf(block)){def.setMustCopyInput();returntrue;}// If we already split the input for some other register, don't make a// third bundle.if(inputRange->bundle()!=input.firstRange()->bundle()){def.setMustCopyInput();returntrue;}// If the input will start out in memory then adding a separate bundle for// memory uses after the def won't help.if(input.def()->isFixed()&&!input.def()->output()->isRegister()){def.setMustCopyInput();returntrue;}// The input cannot have register or reused uses after the definition.for(UsePositionIteratoriter=inputRange->usesBegin();iter;iter++){if(iter->pos<=inputOf(def.ins()))continue;LUse*use=iter->use();if(FindReusingDefOrTemp(insData[iter->pos],use)){def.setMustCopyInput();returntrue;}if(iter->usePolicy()!=LUse::ANY&&iter->usePolicy()!=LUse::KEEPALIVE){def.setMustCopyInput();returntrue;}}LiveRange*preRange=LiveRange::FallibleNew(alloc(),input.vreg(),inputRange->from(),outputOf(def.ins()));if(!preRange)returnfalse;// The new range starts at reg's input position, which means it overlaps// with the old range at one position. This is what we want, because we// need to copy the input before the instruction.LiveRange*postRange=LiveRange::FallibleNew(alloc(),input.vreg(),inputOf(def.ins()),inputRange->to());if(!postRange)returnfalse;inputRange->distributeUses(preRange);inputRange->distributeUses(postRange);MOZ_ASSERT(!inputRange->hasUses());JitSpew(JitSpew_RegAlloc," splitting reused input at %u to try to help grouping",inputOf(def.ins()).bits());LiveBundle*firstBundle=inputRange->bundle();input.removeRange(inputRange);input.addRange(preRange);input.addRange(postRange);firstBundle->removeRange(inputRange);firstBundle->addRange(preRange);// The new range goes in a separate bundle, where it will be spilled during// allocation.LiveBundle*secondBundle=LiveBundle::FallibleNew(alloc(),nullptr,nullptr);if(!secondBundle)returnfalse;secondBundle->addRange(postRange);returntryMergeBundles(def.firstBundle(),input.firstBundle());}boolBacktrackingAllocator::mergeAndQueueRegisters(){MOZ_ASSERT(!vregs[0u].hasRanges());// Create a bundle for each register containing all its ranges.for(size_ti=1;i<graph.numVirtualRegisters();i++){VirtualRegister®=vregs[i];if(!reg.hasRanges())continue;LiveBundle*bundle=LiveBundle::FallibleNew(alloc(),nullptr,nullptr);if(!bundle)returnfalse;for(LiveRange::RegisterLinkIteratoriter=reg.rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);bundle->addRange(range);}}// If there is an OSR block, merge parameters in that block with the// corresponding parameters in the initial block.if(MBasicBlock*osr=graph.mir().osrBlock()){size_toriginal=1;for(LInstructionIteratoriter=osr->lir()->begin();iter!=osr->lir()->end();iter++){if(iter->isParameter()){for(size_ti=0;i<iter->numDefs();i++){DebugOnly<bool>found=false;VirtualRegister¶mVreg=vreg(iter->getDef(i));for(;original<paramVreg.vreg();original++){VirtualRegister&originalVreg=vregs[original];if(*originalVreg.def()->output()==*iter->getDef(i)->output()){MOZ_ASSERT(originalVreg.ins()->isParameter());if(!tryMergeBundles(originalVreg.firstBundle(),paramVreg.firstBundle()))returnfalse;found=true;break;}}MOZ_ASSERT(found);}}}}// Try to merge registers with their reused inputs.for(size_ti=1;i<graph.numVirtualRegisters();i++){VirtualRegister®=vregs[i];if(!reg.hasRanges())continue;if(reg.def()->policy()==LDefinition::MUST_REUSE_INPUT){LUse*use=reg.ins()->getOperand(reg.def()->getReusedInput())->toUse();if(!tryMergeReusedRegister(reg,vreg(use)))returnfalse;}}// Try to merge phis with their inputs.for(size_ti=0;i<graph.numBlocks();i++){LBlock*block=graph.getBlock(i);for(size_tj=0;j<block->numPhis();j++){LPhi*phi=block->getPhi(j);VirtualRegister&outputVreg=vreg(phi->getDef(0));for(size_tk=0,kend=phi->numOperands();k<kend;k++){VirtualRegister&inputVreg=vreg(phi->getOperand(k)->toUse());if(!tryMergeBundles(inputVreg.firstBundle(),outputVreg.firstBundle()))returnfalse;}}}// Add all bundles to the allocation queue, and create spill sets for them.for(size_ti=1;i<graph.numVirtualRegisters();i++){VirtualRegister®=vregs[i];for(LiveRange::RegisterLinkIteratoriter=reg.rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);LiveBundle*bundle=range->bundle();if(range==bundle->firstRange()){if(!alloc().ensureBallast())returnfalse;SpillSet*spill=SpillSet::New(alloc());if(!spill)returnfalse;bundle->setSpillSet(spill);size_tpriority=computePriority(bundle);if(!allocationQueue.insert(QueueItem(bundle,priority)))returnfalse;}}}returntrue;}staticconstsize_tMAX_ATTEMPTS=2;boolBacktrackingAllocator::tryAllocateFixed(LiveBundle*bundle,Requirementrequirement,bool*success,bool*pfixed,LiveBundleVector&conflicting){// Spill bundles which are required to be in a certain stack slot.if(!requirement.allocation().isRegister()){JitSpew(JitSpew_RegAlloc," stack allocation requirement");bundle->setAllocation(requirement.allocation());*success=true;returntrue;}AnyRegisterreg=requirement.allocation().toRegister();returntryAllocateRegister(registers[reg.code()],bundle,success,pfixed,conflicting);}boolBacktrackingAllocator::tryAllocateNonFixed(LiveBundle*bundle,Requirementrequirement,Requirementhint,bool*success,bool*pfixed,LiveBundleVector&conflicting){// If we want, but do not require a bundle to be in a specific register,// only look at that register for allocating and evict or spill if it is// not available. Picking a separate register may be even worse than// spilling, as it will still necessitate moves and will tie up more// registers than if we spilled.if(hint.kind()==Requirement::FIXED){AnyRegisterreg=hint.allocation().toRegister();if(!tryAllocateRegister(registers[reg.code()],bundle,success,pfixed,conflicting))returnfalse;if(*success)returntrue;}// Spill bundles which have no hint or register requirement.if(requirement.kind()==Requirement::NONE&&hint.kind()!=Requirement::REGISTER){JitSpew(JitSpew_RegAlloc," postponed spill (no hint or register requirement)");if(!spilledBundles.append(bundle))returnfalse;*success=true;returntrue;}if(conflicting.empty()||minimalBundle(bundle)){// Search for any available register which the bundle can be// allocated to.for(size_ti=0;i<AnyRegister::Total;i++){if(!tryAllocateRegister(registers[i],bundle,success,pfixed,conflicting))returnfalse;if(*success)returntrue;}}// Spill bundles which have no register requirement if they didn't get// allocated.if(requirement.kind()==Requirement::NONE){JitSpew(JitSpew_RegAlloc," postponed spill (no register requirement)");if(!spilledBundles.append(bundle))returnfalse;*success=true;returntrue;}// We failed to allocate this bundle.MOZ_ASSERT(!*success);returntrue;}boolBacktrackingAllocator::processBundle(MIRGenerator*mir,LiveBundle*bundle){if(JitSpewEnabled(JitSpew_RegAlloc)){JitSpew(JitSpew_RegAlloc,"Allocating %s [priority %"PRIuSIZE"] [weight %"PRIuSIZE"]",bundle->toString().get(),computePriority(bundle),computeSpillWeight(bundle));}// A bundle can be processed by doing any of the following://// - Assigning the bundle a register. The bundle cannot overlap any other// bundle allocated for that physical register.//// - Spilling the bundle, provided it has no register uses.//// - Splitting the bundle into two or more bundles which cover the original// one. The new bundles are placed back onto the priority queue for later// processing.//// - Evicting one or more existing allocated bundles, and then doing one// of the above operations. Evicted bundles are placed back on the// priority queue. Any evicted bundles must have a lower spill weight// than the bundle being processed.//// As long as this structure is followed, termination is guaranteed.// In general, we want to minimize the amount of bundle splitting (which// generally necessitates spills), so allocate longer lived, lower weight// bundles first and evict and split them later if they prevent allocation// for higher weight bundles.Requirementrequirement,hint;boolcanAllocate=computeRequirement(bundle,&requirement,&hint);boolfixed;LiveBundleVectorconflicting;for(size_tattempt=0;;attempt++){if(mir->shouldCancel("Backtracking Allocation (processBundle loop)"))returnfalse;if(canAllocate){boolsuccess=false;fixed=false;conflicting.clear();// Ok, let's try allocating for this bundle.if(requirement.kind()==Requirement::FIXED){if(!tryAllocateFixed(bundle,requirement,&success,&fixed,conflicting))returnfalse;}else{if(!tryAllocateNonFixed(bundle,requirement,hint,&success,&fixed,conflicting))returnfalse;}// If that worked, we're done!if(success)returntrue;// If that didn't work, but we have one or more non-fixed bundles// known to be conflicting, maybe we can evict them and try again.if((attempt<MAX_ATTEMPTS||minimalBundle(bundle))&&!fixed&&!conflicting.empty()&&maximumSpillWeight(conflicting)<computeSpillWeight(bundle)){for(size_ti=0;i<conflicting.length();i++){if(!evictBundle(conflicting[i]))returnfalse;}continue;}}// A minimal bundle cannot be split any further. If we try to split it// it at this point we will just end up with the same bundle and will// enter an infinite loop. Weights and the initial live ranges must// be constructed so that any minimal bundle is allocatable.MOZ_ASSERT(!minimalBundle(bundle));LiveBundle*conflict=conflicting.empty()?nullptr:conflicting[0];returnchooseBundleSplit(bundle,canAllocate&&fixed,conflict);}}boolBacktrackingAllocator::computeRequirement(LiveBundle*bundle,Requirement*requirement,Requirement*hint){// Set any requirement or hint on bundle according to its definition and// uses. Return false if there are conflicting requirements which will// require the bundle to be split.for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);VirtualRegister®=vregs[range->vreg()];if(range->hasDefinition()){// Deal with any definition constraints/hints.LDefinition::Policypolicy=reg.def()->policy();if(policy==LDefinition::FIXED){// Fixed policies get a FIXED requirement.JitSpew(JitSpew_RegAlloc," Requirement %s, fixed by definition",reg.def()->output()->toString().get());if(!requirement->merge(Requirement(*reg.def()->output())))returnfalse;}elseif(reg.ins()->isPhi()){// Phis don't have any requirements, but they should prefer their// input allocations. This is captured by the group hints above.}else{// Non-phis get a REGISTER requirement.if(!requirement->merge(Requirement(Requirement::REGISTER)))returnfalse;}}// Search uses for requirements.for(UsePositionIteratoriter=range->usesBegin();iter;iter++){LUse::Policypolicy=iter->usePolicy();if(policy==LUse::FIXED){AnyRegisterrequired=GetFixedRegister(reg.def(),iter->use());JitSpew(JitSpew_RegAlloc," Requirement %s, due to use at %u",required.name(),iter->pos.bits());// If there are multiple fixed registers which the bundle is// required to use, fail. The bundle will need to be split before// it can be allocated.if(!requirement->merge(Requirement(LAllocation(required))))returnfalse;}elseif(policy==LUse::REGISTER){if(!requirement->merge(Requirement(Requirement::REGISTER)))returnfalse;}elseif(policy==LUse::ANY){// ANY differs from KEEPALIVE by actively preferring a register.if(!hint->merge(Requirement(Requirement::REGISTER)))returnfalse;}}}returntrue;}boolBacktrackingAllocator::tryAllocateRegister(PhysicalRegister&r,LiveBundle*bundle,bool*success,bool*pfixed,LiveBundleVector&conflicting){*success=false;if(!r.allocatable)returntrue;LiveBundleVectoraliasedConflicting;for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);VirtualRegister®=vregs[range->vreg()];if(!reg.isCompatible(r.reg))returntrue;for(size_ta=0;a<r.reg.numAliased();a++){PhysicalRegister&rAlias=registers[r.reg.aliased(a).code()];LiveRange*existing;if(!rAlias.allocations.contains(range,&existing))continue;if(existing->hasVreg()){MOZ_ASSERT(existing->bundle()->allocation().toRegister()==rAlias.reg);boolduplicate=false;for(size_ti=0;i<aliasedConflicting.length();i++){if(aliasedConflicting[i]==existing->bundle()){duplicate=true;break;}}if(!duplicate&&!aliasedConflicting.append(existing->bundle()))returnfalse;}else{JitSpew(JitSpew_RegAlloc," %s collides with fixed use %s",rAlias.reg.name(),existing->toString().get());*pfixed=true;returntrue;}}}if(!aliasedConflicting.empty()){// One or more aliased registers is allocated to another bundle// overlapping this one. Keep track of the conflicting set, and in the// case of multiple conflicting sets keep track of the set with the// lowest maximum spill weight.// The #ifdef guards against "unused variable 'existing'" bustage.#ifdef JS_JITSPEWif(JitSpewEnabled(JitSpew_RegAlloc)){if(aliasedConflicting.length()==1){LiveBundle*existing=aliasedConflicting[0];JitSpew(JitSpew_RegAlloc," %s collides with %s [weight %"PRIuSIZE"]",r.reg.name(),existing->toString().get(),computeSpillWeight(existing));}else{JitSpew(JitSpew_RegAlloc," %s collides with the following",r.reg.name());for(size_ti=0;i<aliasedConflicting.length();i++){LiveBundle*existing=aliasedConflicting[i];JitSpew(JitSpew_RegAlloc," %s [weight %"PRIuSIZE"]",existing->toString().get(),computeSpillWeight(existing));}}}#endifif(conflicting.empty()){if(!conflicting.appendAll(aliasedConflicting))returnfalse;}else{if(maximumSpillWeight(aliasedConflicting)<maximumSpillWeight(conflicting)){conflicting.clear();if(!conflicting.appendAll(aliasedConflicting))returnfalse;}}returntrue;}JitSpew(JitSpew_RegAlloc," allocated to %s",r.reg.name());for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);if(!alloc().ensureBallast())returnfalse;if(!r.allocations.insert(range))returnfalse;}bundle->setAllocation(LAllocation(r.reg));*success=true;returntrue;}boolBacktrackingAllocator::evictBundle(LiveBundle*bundle){if(JitSpewEnabled(JitSpew_RegAlloc)){JitSpew(JitSpew_RegAlloc," Evicting %s [priority %"PRIuSIZE"] [weight %"PRIuSIZE"]",bundle->toString().get(),computePriority(bundle),computeSpillWeight(bundle));}AnyRegisterreg(bundle->allocation().toRegister());PhysicalRegister&physical=registers[reg.code()];MOZ_ASSERT(physical.reg==reg&&physical.allocatable);for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);physical.allocations.remove(range);}bundle->setAllocation(LAllocation());size_tpriority=computePriority(bundle);returnallocationQueue.insert(QueueItem(bundle,priority));}boolBacktrackingAllocator::splitAndRequeueBundles(LiveBundle*bundle,constLiveBundleVector&newBundles){if(JitSpewEnabled(JitSpew_RegAlloc)){JitSpew(JitSpew_RegAlloc," splitting bundle %s into:",bundle->toString().get());for(size_ti=0;i<newBundles.length();i++)JitSpew(JitSpew_RegAlloc," %s",newBundles[i]->toString().get());}// Remove all ranges in the old bundle from their register's list.for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);vregs[range->vreg()].removeRange(range);}// Add all ranges in the new bundles to their register's list.for(size_ti=0;i<newBundles.length();i++){LiveBundle*newBundle=newBundles[i];for(LiveRange::BundleLinkIteratoriter=newBundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);vregs[range->vreg()].addRange(range);}}// Queue the new bundles for register assignment.for(size_ti=0;i<newBundles.length();i++){LiveBundle*newBundle=newBundles[i];size_tpriority=computePriority(newBundle);if(!allocationQueue.insert(QueueItem(newBundle,priority)))returnfalse;}returntrue;}boolBacktrackingAllocator::spill(LiveBundle*bundle){JitSpew(JitSpew_RegAlloc," Spilling bundle");MOZ_ASSERT(bundle->allocation().isBogus());if(LiveBundle*spillParent=bundle->spillParent()){JitSpew(JitSpew_RegAlloc," Using existing spill bundle");for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);LiveRange*parentRange=spillParent->rangeFor(range->from());MOZ_ASSERT(parentRange->contains(range));MOZ_ASSERT(range->vreg()==parentRange->vreg());range->distributeUses(parentRange);MOZ_ASSERT(!range->hasUses());vregs[range->vreg()].removeRange(range);}returntrue;}returnbundle->spillSet()->addSpilledBundle(bundle);}boolBacktrackingAllocator::tryAllocatingRegistersForSpillBundles(){for(autoit=spilledBundles.begin();it!=spilledBundles.end();it++){LiveBundle*bundle=*it;LiveBundleVectorconflicting;boolfixed=false;boolsuccess=false;if(mir->shouldCancel("Backtracking Try Allocating Spilled Bundles"))returnfalse;if(JitSpewEnabled(JitSpew_RegAlloc))JitSpew(JitSpew_RegAlloc,"Spill or allocate %s",bundle->toString().get());// Search for any available register which the bundle can be// allocated to.for(size_ti=0;i<AnyRegister::Total;i++){if(!tryAllocateRegister(registers[i],bundle,&success,&fixed,conflicting))returnfalse;if(success)break;}// If the bundle still has no register, spill the bundle.if(!success&&!spill(bundle))returnfalse;}returntrue;}boolBacktrackingAllocator::pickStackSlots(){for(size_ti=1;i<graph.numVirtualRegisters();i++){VirtualRegister®=vregs[i];if(mir->shouldCancel("Backtracking Pick Stack Slots"))returnfalse;for(LiveRange::RegisterLinkIteratoriter=reg.rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);LiveBundle*bundle=range->bundle();if(bundle->allocation().isBogus()){if(!pickStackSlot(bundle->spillSet()))returnfalse;MOZ_ASSERT(!bundle->allocation().isBogus());}}}returntrue;}boolBacktrackingAllocator::pickStackSlot(SpillSet*spillSet){// Look through all ranges that have been spilled in this set for a// register definition which is fixed to a stack or argument slot. If we// find one, use it for all bundles that have been spilled. tryMergeBundles// makes sure this reuse is possible when an initial bundle contains ranges// from multiple virtual registers.for(size_ti=0;i<spillSet->numSpilledBundles();i++){LiveBundle*bundle=spillSet->spilledBundle(i);for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);if(range->hasDefinition()){LDefinition*def=vregs[range->vreg()].def();if(def->policy()==LDefinition::FIXED){MOZ_ASSERT(!def->output()->isRegister());MOZ_ASSERT(!def->output()->isStackSlot());spillSet->setAllocation(*def->output());returntrue;}}}}LDefinition::Typetype=vregs[spillSet->spilledBundle(0)->firstRange()->vreg()].type();SpillSlotList*slotList;switch(StackSlotAllocator::width(type)){case4:slotList=&normalSlots;break;case8:slotList=&doubleSlots;break;case16:slotList=&quadSlots;break;default:MOZ_CRASH("Bad width");}// Maximum number of existing spill slots we will look at before giving up// and allocating a new slot.staticconstsize_tMAX_SEARCH_COUNT=10;size_tsearches=0;SpillSlot*stop=nullptr;while(!slotList->empty()){SpillSlot*spillSlot=*slotList->begin();if(!stop){stop=spillSlot;}elseif(stop==spillSlot){// We looked through every slot in the list.break;}boolsuccess=true;for(size_ti=0;i<spillSet->numSpilledBundles();i++){LiveBundle*bundle=spillSet->spilledBundle(i);for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);LiveRange*existing;if(spillSlot->allocated.contains(range,&existing)){success=false;break;}}if(!success)break;}if(success){// We can reuse this physical stack slot for the new bundles.// Update the allocated ranges for the slot.for(size_ti=0;i<spillSet->numSpilledBundles();i++){LiveBundle*bundle=spillSet->spilledBundle(i);if(!insertAllRanges(spillSlot->allocated,bundle))returnfalse;}spillSet->setAllocation(spillSlot->alloc);returntrue;}// On a miss, move the spill to the end of the list. This will cause us// to make fewer attempts to allocate from slots with a large and// highly contended range.slotList->popFront();slotList->pushBack(spillSlot);if(++searches==MAX_SEARCH_COUNT)break;}// We need a new physical stack slot.uint32_tstackSlot=stackSlotAllocator.allocateSlot(type);SpillSlot*spillSlot=new(alloc().fallible())SpillSlot(stackSlot,alloc().lifoAlloc());if(!spillSlot)returnfalse;for(size_ti=0;i<spillSet->numSpilledBundles();i++){LiveBundle*bundle=spillSet->spilledBundle(i);if(!insertAllRanges(spillSlot->allocated,bundle))returnfalse;}spillSet->setAllocation(spillSlot->alloc);slotList->pushFront(spillSlot);returntrue;}boolBacktrackingAllocator::insertAllRanges(LiveRangeSet&set,LiveBundle*bundle){for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);if(!alloc().ensureBallast())returnfalse;if(!set.insert(range))returnfalse;}returntrue;}boolBacktrackingAllocator::deadRange(LiveRange*range){// Check for direct uses of this range.if(range->hasUses()||range->hasDefinition())returnfalse;CodePositionstart=range->from();LNode*ins=insData[start];if(start==entryOf(ins->block()))returnfalse;VirtualRegister®=vregs[range->vreg()];// Check if there are later ranges for this vreg.LiveRange::RegisterLinkIteratoriter=reg.rangesBegin(range);for(iter++;iter;iter++){LiveRange*laterRange=LiveRange::get(*iter);if(laterRange->from()>range->from())returnfalse;}// Check if this range ends at a loop backedge.LNode*last=insData[range->to().previous()];if(last->isGoto()&&last->toGoto()->target()->id()<last->block()->mir()->id())returnfalse;// Check if there are phis which this vreg flows to.if(reg.usedByPhi())returnfalse;returntrue;}boolBacktrackingAllocator::resolveControlFlow(){// Add moves to handle changing assignments for vregs over their lifetime.JitSpew(JitSpew_RegAlloc,"Resolving control flow (vreg loop)");// Look for places where a register's assignment changes in the middle of a// basic block.MOZ_ASSERT(!vregs[0u].hasRanges());for(size_ti=1;i<graph.numVirtualRegisters();i++){VirtualRegister®=vregs[i];if(mir->shouldCancel("Backtracking Resolve Control Flow (vreg outer loop)"))returnfalse;for(LiveRange::RegisterLinkIteratoriter=reg.rangesBegin();iter;){LiveRange*range=LiveRange::get(*iter);if(mir->shouldCancel("Backtracking Resolve Control Flow (vreg inner loop)"))returnfalse;// Remove ranges which will never be used.if(deadRange(range)){reg.removeRangeAndIncrement(iter);continue;}// The range which defines the register does not have a predecessor// to add moves from.if(range->hasDefinition()){iter++;continue;}// Ignore ranges that start at block boundaries. We will handle// these in the next phase.CodePositionstart=range->from();LNode*ins=insData[start];if(start==entryOf(ins->block())){iter++;continue;}// If we already saw a range which covers the start of this range// and has the same allocation, we don't need an explicit move at// the start of this range.boolskip=false;for(LiveRange::RegisterLinkIteratorprevIter=reg.rangesBegin();prevIter!=iter;prevIter++){LiveRange*prevRange=LiveRange::get(*prevIter);if(prevRange->covers(start)&&prevRange->bundle()->allocation()==range->bundle()->allocation()){skip=true;break;}}if(skip){iter++;continue;}if(!alloc().ensureBallast())returnfalse;LiveRange*predecessorRange=reg.rangeFor(start.previous(),/* preferRegister = */true);if(start.subpos()==CodePosition::INPUT){if(!moveInput(ins->toInstruction(),predecessorRange,range,reg.type()))returnfalse;}else{if(!moveAfter(ins->toInstruction(),predecessorRange,range,reg.type()))returnfalse;}iter++;}}JitSpew(JitSpew_RegAlloc,"Resolving control flow (block loop)");for(size_ti=0;i<graph.numBlocks();i++){if(mir->shouldCancel("Backtracking Resolve Control Flow (block loop)"))returnfalse;LBlock*successor=graph.getBlock(i);MBasicBlock*mSuccessor=successor->mir();if(mSuccessor->numPredecessors()<1)continue;// Resolve phis to moves.for(size_tj=0;j<successor->numPhis();j++){LPhi*phi=successor->getPhi(j);MOZ_ASSERT(phi->numDefs()==1);LDefinition*def=phi->getDef(0);VirtualRegister®=vreg(def);LiveRange*to=reg.rangeFor(entryOf(successor));MOZ_ASSERT(to);for(size_tk=0;k<mSuccessor->numPredecessors();k++){LBlock*predecessor=mSuccessor->getPredecessor(k)->lir();MOZ_ASSERT(predecessor->mir()->numSuccessors()==1);LAllocation*input=phi->getOperand(k);LiveRange*from=vreg(input).rangeFor(exitOf(predecessor),/* preferRegister = */true);MOZ_ASSERT(from);if(!alloc().ensureBallast())returnfalse;if(!moveAtExit(predecessor,from,to,def->type()))returnfalse;}}}// Add moves to resolve graph edges with different allocations at their// source and target.for(size_ti=1;i<graph.numVirtualRegisters();i++){VirtualRegister®=vregs[i];for(LiveRange::RegisterLinkIteratoriter=reg.rangesBegin();iter;iter++){LiveRange*targetRange=LiveRange::get(*iter);size_tfirstBlockId=insData[targetRange->from()]->block()->mir()->id();if(!targetRange->covers(entryOf(graph.getBlock(firstBlockId))))firstBlockId++;for(size_tid=firstBlockId;id<graph.numBlocks();id++){LBlock*successor=graph.getBlock(id);if(!targetRange->covers(entryOf(successor)))break;BitSet&live=liveIn[id];if(!live.contains(i))continue;for(size_tj=0;j<successor->mir()->numPredecessors();j++){LBlock*predecessor=successor->mir()->getPredecessor(j)->lir();if(targetRange->covers(exitOf(predecessor)))continue;if(!alloc().ensureBallast())returnfalse;LiveRange*from=reg.rangeFor(exitOf(predecessor),true);if(successor->mir()->numPredecessors()>1){MOZ_ASSERT(predecessor->mir()->numSuccessors()==1);if(!moveAtExit(predecessor,from,targetRange,reg.type()))returnfalse;}else{if(!moveAtEntry(successor,from,targetRange,reg.type()))returnfalse;}}}}}returntrue;}boolBacktrackingAllocator::isReusedInput(LUse*use,LNode*ins,boolconsiderCopy){if(LDefinition*def=FindReusingDefOrTemp(ins,use))returnconsiderCopy||!vregs[def->virtualRegister()].mustCopyInput();returnfalse;}boolBacktrackingAllocator::isRegisterUse(UsePosition*use,LNode*ins,boolconsiderCopy){switch(use->usePolicy()){caseLUse::ANY:returnisReusedInput(use->use(),ins,considerCopy);caseLUse::REGISTER:caseLUse::FIXED:returntrue;default:returnfalse;}}boolBacktrackingAllocator::isRegisterDefinition(LiveRange*range){if(!range->hasDefinition())returnfalse;VirtualRegister®=vregs[range->vreg()];if(reg.ins()->isPhi())returnfalse;if(reg.def()->policy()==LDefinition::FIXED&&!reg.def()->output()->isRegister())returnfalse;returntrue;}boolBacktrackingAllocator::reifyAllocations(){JitSpew(JitSpew_RegAlloc,"Reifying Allocations");MOZ_ASSERT(!vregs[0u].hasRanges());for(size_ti=1;i<graph.numVirtualRegisters();i++){VirtualRegister®=vregs[i];if(mir->shouldCancel("Backtracking Reify Allocations (main loop)"))returnfalse;for(LiveRange::RegisterLinkIteratoriter=reg.rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);if(range->hasDefinition()){reg.def()->setOutput(range->bundle()->allocation());if(reg.ins()->recoversInput()){LSnapshot*snapshot=reg.ins()->toInstruction()->snapshot();for(size_ti=0;i<snapshot->numEntries();i++){LAllocation*entry=snapshot->getEntry(i);if(entry->isUse()&&entry->toUse()->policy()==LUse::RECOVERED_INPUT)*entry=*reg.def()->output();}}}for(UsePositionIteratoriter(range->usesBegin());iter;iter++){LAllocation*alloc=iter->use();*alloc=range->bundle()->allocation();// For any uses which feed into MUST_REUSE_INPUT definitions,// add copies if the use and def have different allocations.LNode*ins=insData[iter->pos];if(LDefinition*def=FindReusingDefOrTemp(ins,alloc)){LiveRange*outputRange=vreg(def).rangeFor(outputOf(ins));LAllocationres=outputRange->bundle()->allocation();LAllocationsourceAlloc=range->bundle()->allocation();if(res!=*alloc){if(!this->alloc().ensureBallast())returnfalse;if(NumReusingDefs(ins)<=1){LMoveGroup*group=getInputMoveGroup(ins->toInstruction());if(!group->addAfter(sourceAlloc,res,reg.type()))returnfalse;}else{LMoveGroup*group=getFixReuseMoveGroup(ins->toInstruction());if(!group->add(sourceAlloc,res,reg.type()))returnfalse;}*alloc=res;}}}addLiveRegistersForRange(reg,range);}}graph.setLocalSlotCount(stackSlotAllocator.stackHeight());returntrue;}size_tBacktrackingAllocator::findFirstNonCallSafepoint(CodePositionfrom){size_ti=0;for(;i<graph.numNonCallSafepoints();i++){constLInstruction*ins=graph.getNonCallSafepoint(i);if(from<=inputOf(ins))break;}returni;}voidBacktrackingAllocator::addLiveRegistersForRange(VirtualRegister®,LiveRange*range){// Fill in the live register sets for all non-call safepoints.LAllocationa=range->bundle()->allocation();if(!a.isRegister())return;// Don't add output registers to the safepoint.CodePositionstart=range->from();if(range->hasDefinition()&&!reg.isTemp()){#ifdef CHECK_OSIPOINT_REGISTERS// We don't add the output register to the safepoint,// but it still might get added as one of the inputs.// So eagerly add this reg to the safepoint clobbered registers.if(reg.ins()->isInstruction()){if(LSafepoint*safepoint=reg.ins()->toInstruction()->safepoint())safepoint->addClobberedRegister(a.toRegister());}#endifstart=start.next();}size_ti=findFirstNonCallSafepoint(start);for(;i<graph.numNonCallSafepoints();i++){LInstruction*ins=graph.getNonCallSafepoint(i);CodePositionpos=inputOf(ins);// Safepoints are sorted, so we can shortcut out of this loop// if we go out of range.if(range->to()<=pos)break;MOZ_ASSERT(range->covers(pos));LSafepoint*safepoint=ins->safepoint();safepoint->addLiveRegister(a.toRegister());#ifdef CHECK_OSIPOINT_REGISTERSif(reg.isTemp())safepoint->addClobberedRegister(a.toRegister());#endif}}staticinlineboolIsNunbox(VirtualRegister®){#ifdef JS_NUNBOX32returnreg.type()==LDefinition::TYPE||reg.type()==LDefinition::PAYLOAD;#elsereturnfalse;#endif}staticinlineboolIsSlotsOrElements(VirtualRegister®){returnreg.type()==LDefinition::SLOTS;}staticinlineboolIsTraceable(VirtualRegister®){if(reg.type()==LDefinition::OBJECT)returntrue;#ifdef JS_PUNBOX64if(reg.type()==LDefinition::BOX)returntrue;#endifreturnfalse;}size_tBacktrackingAllocator::findFirstSafepoint(CodePositionpos,size_tstartFrom){size_ti=startFrom;for(;i<graph.numSafepoints();i++){LInstruction*ins=graph.getSafepoint(i);if(pos<=inputOf(ins))break;}returni;}boolBacktrackingAllocator::populateSafepoints(){JitSpew(JitSpew_RegAlloc,"Populating Safepoints");size_tfirstSafepoint=0;MOZ_ASSERT(!vregs[0u].def());for(uint32_ti=1;i<graph.numVirtualRegisters();i++){VirtualRegister®=vregs[i];if(!reg.def()||(!IsTraceable(reg)&&!IsSlotsOrElements(reg)&&!IsNunbox(reg)))continue;firstSafepoint=findFirstSafepoint(inputOf(reg.ins()),firstSafepoint);if(firstSafepoint>=graph.numSafepoints())break;for(LiveRange::RegisterLinkIteratoriter=reg.rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);for(size_tj=firstSafepoint;j<graph.numSafepoints();j++){LInstruction*ins=graph.getSafepoint(j);if(!range->covers(inputOf(ins))){if(inputOf(ins)>=range->to())break;continue;}// Include temps but not instruction outputs. Also make sure// MUST_REUSE_INPUT is not used with gcthings or nunboxes, or// we would have to add the input reg to this safepoint.if(ins==reg.ins()&&!reg.isTemp()){DebugOnly<LDefinition*>def=reg.def();MOZ_ASSERT_IF(def->policy()==LDefinition::MUST_REUSE_INPUT,def->type()==LDefinition::GENERAL||def->type()==LDefinition::INT32||def->type()==LDefinition::FLOAT32||def->type()==LDefinition::DOUBLE);continue;}LSafepoint*safepoint=ins->safepoint();LAllocationa=range->bundle()->allocation();if(a.isGeneralReg()&&ins->isCall())continue;switch(reg.type()){caseLDefinition::OBJECT:if(!safepoint->addGcPointer(a))returnfalse;break;caseLDefinition::SLOTS:if(!safepoint->addSlotsOrElementsPointer(a))returnfalse;break;#ifdef JS_NUNBOX32caseLDefinition::TYPE:if(!safepoint->addNunboxType(i,a))returnfalse;break;caseLDefinition::PAYLOAD:if(!safepoint->addNunboxPayload(i,a))returnfalse;break;#elsecaseLDefinition::BOX:if(!safepoint->addBoxedValue(a))returnfalse;break;#endifdefault:MOZ_CRASH("Bad register type");}}}}returntrue;}boolBacktrackingAllocator::annotateMoveGroups(){// Annotate move groups in the LIR graph with any register that is not// allocated at that point and can be used as a scratch register. This is// only required for x86, as other platforms always have scratch registers// available for use.#ifdef JS_CODEGEN_X86LiveRange*range=LiveRange::FallibleNew(alloc(),0,CodePosition(),CodePosition().next());if(!range)returnfalse;for(size_ti=0;i<graph.numBlocks();i++){if(mir->shouldCancel("Backtracking Annotate Move Groups"))returnfalse;LBlock*block=graph.getBlock(i);LInstruction*last=nullptr;for(LInstructionIteratoriter=block->begin();iter!=block->end();++iter){if(iter->isMoveGroup()){CodePositionfrom=last?outputOf(last):entryOf(block);range->setTo(from.next());range->setFrom(from);for(size_ti=0;i<AnyRegister::Total;i++){PhysicalRegister®=registers[i];if(reg.reg.isFloat()||!reg.allocatable)continue;// This register is unavailable for use if (a) it is in use// by some live range immediately before the move group,// or (b) it is an operand in one of the group's moves. The// latter case handles live ranges which end immediately// before the move group or start immediately after.// For (b) we need to consider move groups immediately// preceding or following this one.if(iter->toMoveGroup()->uses(reg.reg.gpr()))continue;boolfound=false;LInstructionIteratorniter(iter);for(niter++;niter!=block->end();niter++){if(niter->isMoveGroup()){if(niter->toMoveGroup()->uses(reg.reg.gpr())){found=true;break;}}else{break;}}if(iter!=block->begin()){LInstructionIteratorriter(iter);do{riter--;if(riter->isMoveGroup()){if(riter->toMoveGroup()->uses(reg.reg.gpr())){found=true;break;}}else{break;}}while(riter!=block->begin());}LiveRange*existing;if(found||reg.allocations.contains(range,&existing))continue;iter->toMoveGroup()->setScratchRegister(reg.reg.gpr());break;}}else{last=*iter;}}}#endifreturntrue;}/////////////////////////////////////////////////////////////////////// Debugging methods/////////////////////////////////////////////////////////////////////#ifdef JS_JITSPEWUniqueCharsLiveRange::toString()const{AutoEnterOOMUnsafeRegionoomUnsafe;UniqueCharsbuf=JS_smprintf("v%u [%u,%u)",hasVreg()?vreg():0,from().bits(),to().bits());if(buf&&bundle()&&!bundle()->allocation().isBogus())buf=JS_sprintf_append(Move(buf)," %s",bundle()->allocation().toString().get());if(buf&&hasDefinition())buf=JS_sprintf_append(Move(buf)," (def)");for(UsePositionIteratoriter=usesBegin();buf&&iter;iter++)buf=JS_sprintf_append(Move(buf)," %s@%u",iter->use()->toString().get(),iter->pos.bits());if(!buf)oomUnsafe.crash("LiveRange::toString()");returnbuf;}UniqueCharsLiveBundle::toString()const{AutoEnterOOMUnsafeRegionoomUnsafe;// Suppress -Wformat warning.UniqueCharsbuf=JS_smprintf("%s","");for(LiveRange::BundleLinkIteratoriter=rangesBegin();buf&&iter;iter++){buf=JS_sprintf_append(Move(buf),"%s %s",(iter==rangesBegin())?"":" ##",LiveRange::get(*iter)->toString().get());}if(!buf)oomUnsafe.crash("LiveBundle::toString()");returnbuf;}#endif // JS_JITSPEWvoidBacktrackingAllocator::dumpVregs(){#ifdef JS_JITSPEWMOZ_ASSERT(!vregs[0u].hasRanges());fprintf(stderr,"Live ranges by virtual register:\n");for(uint32_ti=1;i<graph.numVirtualRegisters();i++){fprintf(stderr," ");VirtualRegister®=vregs[i];for(LiveRange::RegisterLinkIteratoriter=reg.rangesBegin();iter;iter++){if(iter!=reg.rangesBegin())fprintf(stderr," ## ");fprintf(stderr,"%s",LiveRange::get(*iter)->toString().get());}fprintf(stderr,"\n");}fprintf(stderr,"\nLive ranges by bundle:\n");for(uint32_ti=1;i<graph.numVirtualRegisters();i++){VirtualRegister®=vregs[i];for(LiveRange::RegisterLinkIteratorbaseIter=reg.rangesBegin();baseIter;baseIter++){LiveRange*range=LiveRange::get(*baseIter);LiveBundle*bundle=range->bundle();if(range==bundle->firstRange()){fprintf(stderr," ");for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){if(iter!=bundle->rangesBegin())fprintf(stderr," ## ");fprintf(stderr,"%s",LiveRange::get(*iter)->toString().get());}fprintf(stderr,"\n");}}}#endif}#ifdef JS_JITSPEWstructBacktrackingAllocator::PrintLiveRange{bool&first_;explicitPrintLiveRange(bool&first):first_(first){}voidoperator()(constLiveRange*range){if(first_)first_=false;elsefprintf(stderr," /");fprintf(stderr," %s",range->toString().get());}};#endifvoidBacktrackingAllocator::dumpAllocations(){#ifdef JS_JITSPEWfprintf(stderr,"Allocations:\n");dumpVregs();fprintf(stderr,"Allocations by physical register:\n");for(size_ti=0;i<AnyRegister::Total;i++){if(registers[i].allocatable&&!registers[i].allocations.empty()){fprintf(stderr," %s:",AnyRegister::FromCode(i).name());boolfirst=true;registers[i].allocations.forEach(PrintLiveRange(first));fprintf(stderr,"\n");}}fprintf(stderr,"\n");#endif // JS_JITSPEW}///////////////////////////////////////////////////////////////////////////////// Heuristic Methods///////////////////////////////////////////////////////////////////////////////size_tBacktrackingAllocator::computePriority(LiveBundle*bundle){// The priority of a bundle is its total length, so that longer lived// bundles will be processed before shorter ones (even if the longer ones// have a low spill weight). See processBundle().size_tlifetimeTotal=0;for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);lifetimeTotal+=range->to()-range->from();}returnlifetimeTotal;}boolBacktrackingAllocator::minimalDef(LiveRange*range,LNode*ins){// Whether this is a minimal range capturing a definition at ins.return(range->to()<=minimalDefEnd(ins).next())&&((!ins->isPhi()&&range->from()==inputOf(ins))||range->from()==outputOf(ins));}boolBacktrackingAllocator::minimalUse(LiveRange*range,UsePosition*use){// Whether this is a minimal range capturing |use|.LNode*ins=insData[use->pos];return(range->from()==inputOf(ins))&&(range->to()==(use->use()->usedAtStart()?outputOf(ins):outputOf(ins).next()));}boolBacktrackingAllocator::minimalBundle(LiveBundle*bundle,bool*pfixed){LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();LiveRange*range=LiveRange::get(*iter);if(!range->hasVreg()){*pfixed=true;returntrue;}// If a bundle contains multiple ranges, splitAtAllRegisterUses will split// each range into a separate bundle.if(++iter)returnfalse;if(range->hasDefinition()){VirtualRegister®=vregs[range->vreg()];if(pfixed)*pfixed=reg.def()->policy()==LDefinition::FIXED&®.def()->output()->isRegister();returnminimalDef(range,reg.ins());}boolfixed=false,minimal=false,multiple=false;for(UsePositionIteratoriter=range->usesBegin();iter;iter++){if(iter!=range->usesBegin())multiple=true;switch(iter->usePolicy()){caseLUse::FIXED:if(fixed)returnfalse;fixed=true;if(minimalUse(range,*iter))minimal=true;break;caseLUse::REGISTER:if(minimalUse(range,*iter))minimal=true;break;default:break;}}// If a range contains a fixed use and at least one other use,// splitAtAllRegisterUses will split each use into a different bundle.if(multiple&&fixed)minimal=false;if(pfixed)*pfixed=fixed;returnminimal;}size_tBacktrackingAllocator::computeSpillWeight(LiveBundle*bundle){// Minimal bundles have an extremely high spill weight, to ensure they// can evict any other bundles and be allocated to a register.boolfixed;if(minimalBundle(bundle,&fixed))returnfixed?2000000:1000000;size_tusesTotal=0;fixed=false;for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);if(range->hasDefinition()){VirtualRegister®=vregs[range->vreg()];if(reg.def()->policy()==LDefinition::FIXED&®.def()->output()->isRegister()){usesTotal+=2000;fixed=true;}elseif(!reg.ins()->isPhi()){usesTotal+=2000;}}for(UsePositionIteratoriter=range->usesBegin();iter;iter++){switch(iter->usePolicy()){caseLUse::ANY:usesTotal+=1000;break;caseLUse::FIXED:fixed=true;MOZ_FALLTHROUGH;caseLUse::REGISTER:usesTotal+=2000;break;caseLUse::KEEPALIVE:break;default:// Note: RECOVERED_INPUT will not appear in UsePositionIterator.MOZ_CRASH("Bad use");}}}// Bundles with fixed uses are given a higher spill weight, since they must// be allocated to a specific register.if(testbed&&fixed)usesTotal*=2;// Compute spill weight as a use density, lowering the weight for long// lived bundles with relatively few uses.size_tlifetimeTotal=computePriority(bundle);returnlifetimeTotal?usesTotal/lifetimeTotal:0;}size_tBacktrackingAllocator::maximumSpillWeight(constLiveBundleVector&bundles){size_tmaxWeight=0;for(size_ti=0;i<bundles.length();i++)maxWeight=Max(maxWeight,computeSpillWeight(bundles[i]));returnmaxWeight;}boolBacktrackingAllocator::trySplitAcrossHotcode(LiveBundle*bundle,bool*success){// If this bundle has portions that are hot and portions that are cold,// split it at the boundaries between hot and cold code.LiveRange*hotRange=nullptr;for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);if(hotcode.contains(range,&hotRange))break;}// Don't split if there is no hot code in the bundle.if(!hotRange){JitSpew(JitSpew_RegAlloc," bundle does not contain hot code");returntrue;}// Don't split if there is no cold code in the bundle.boolcoldCode=false;for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);if(!hotRange->contains(range)){coldCode=true;break;}}if(!coldCode){JitSpew(JitSpew_RegAlloc," bundle does not contain cold code");returntrue;}JitSpew(JitSpew_RegAlloc," split across hot range %s",hotRange->toString().get());// Tweak the splitting method when compiling wasm code to look at actual// uses within the hot/cold code. This heuristic is in place as the below// mechanism regresses several asm.js tests. Hopefully this will be fixed// soon and this special case removed. See bug 948838.if(compilingWasm()){SplitPositionVectorsplitPositions;if(!splitPositions.append(hotRange->from())||!splitPositions.append(hotRange->to()))returnfalse;*success=true;returnsplitAt(bundle,splitPositions);}LiveBundle*hotBundle=LiveBundle::FallibleNew(alloc(),bundle->spillSet(),bundle->spillParent());if(!hotBundle)returnfalse;LiveBundle*preBundle=nullptr;LiveBundle*postBundle=nullptr;LiveBundle*coldBundle=nullptr;if(testbed){coldBundle=LiveBundle::FallibleNew(alloc(),bundle->spillSet(),bundle->spillParent());if(!coldBundle)returnfalse;}// Accumulate the ranges of hot and cold code in the bundle. Note that// we are only comparing with the single hot range found, so the cold code// may contain separate hot ranges.for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);LiveRange::Rangehot,coldPre,coldPost;range->intersect(hotRange,&coldPre,&hot,&coldPost);if(!hot.empty()){if(!hotBundle->addRangeAndDistributeUses(alloc(),range,hot.from,hot.to))returnfalse;}if(!coldPre.empty()){if(testbed){if(!coldBundle->addRangeAndDistributeUses(alloc(),range,coldPre.from,coldPre.to))returnfalse;}else{if(!preBundle){preBundle=LiveBundle::FallibleNew(alloc(),bundle->spillSet(),bundle->spillParent());if(!preBundle)returnfalse;}if(!preBundle->addRangeAndDistributeUses(alloc(),range,coldPre.from,coldPre.to))returnfalse;}}if(!coldPost.empty()){if(testbed){if(!coldBundle->addRangeAndDistributeUses(alloc(),range,coldPost.from,coldPost.to))returnfalse;}else{if(!postBundle){postBundle=LiveBundle::FallibleNew(alloc(),bundle->spillSet(),bundle->spillParent());if(!postBundle)returnfalse;}if(!postBundle->addRangeAndDistributeUses(alloc(),range,coldPost.from,coldPost.to))returnfalse;}}}MOZ_ASSERT(hotBundle->numRanges()!=0);LiveBundleVectornewBundles;if(!newBundles.append(hotBundle))returnfalse;if(testbed){MOZ_ASSERT(coldBundle->numRanges()!=0);if(!newBundles.append(coldBundle))returnfalse;}else{MOZ_ASSERT(preBundle||postBundle);if(preBundle&&!newBundles.append(preBundle))returnfalse;if(postBundle&&!newBundles.append(postBundle))returnfalse;}*success=true;returnsplitAndRequeueBundles(bundle,newBundles);}boolBacktrackingAllocator::trySplitAfterLastRegisterUse(LiveBundle*bundle,LiveBundle*conflict,bool*success){// If this bundle's later uses do not require it to be in a register,// split it after the last use which does require a register. If conflict// is specified, only consider register uses before the conflict starts.CodePositionlastRegisterFrom,lastRegisterTo,lastUse;for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);// If the range defines a register, consider that a register use for// our purposes here.if(isRegisterDefinition(range)){CodePositionspillStart=minimalDefEnd(insData[range->from()]).next();if(!conflict||spillStart<conflict->firstRange()->from()){lastUse=lastRegisterFrom=range->from();lastRegisterTo=spillStart;}}for(UsePositionIteratoriter(range->usesBegin());iter;iter++){LNode*ins=insData[iter->pos];// Uses in the bundle should be sorted.MOZ_ASSERT(iter->pos>=lastUse);lastUse=inputOf(ins);if(!conflict||outputOf(ins)<conflict->firstRange()->from()){if(isRegisterUse(*iter,ins,/* considerCopy = */true)){lastRegisterFrom=inputOf(ins);lastRegisterTo=iter->pos.next();}}}}// Can't trim non-register uses off the end by splitting.if(!lastRegisterFrom.bits()){JitSpew(JitSpew_RegAlloc," bundle has no register uses");returntrue;}if(lastUse<lastRegisterTo){JitSpew(JitSpew_RegAlloc," bundle's last use is a register use");returntrue;}JitSpew(JitSpew_RegAlloc," split after last register use at %u",lastRegisterTo.bits());SplitPositionVectorsplitPositions;if(!splitPositions.append(lastRegisterTo))returnfalse;*success=true;returnsplitAt(bundle,splitPositions);}boolBacktrackingAllocator::trySplitBeforeFirstRegisterUse(LiveBundle*bundle,LiveBundle*conflict,bool*success){// If this bundle's earlier uses do not require it to be in a register,// split it before the first use which does require a register. If conflict// is specified, only consider register uses after the conflict ends.if(isRegisterDefinition(bundle->firstRange())){JitSpew(JitSpew_RegAlloc," bundle is defined by a register");returntrue;}if(!bundle->firstRange()->hasDefinition()){JitSpew(JitSpew_RegAlloc," bundle does not have definition");returntrue;}CodePositionfirstRegisterFrom;CodePositionconflictEnd;if(conflict){for(LiveRange::BundleLinkIteratoriter=conflict->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);if(range->to()>conflictEnd)conflictEnd=range->to();}}for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);if(!conflict||range->from()>conflictEnd){if(range->hasDefinition()&&isRegisterDefinition(range)){firstRegisterFrom=range->from();break;}}for(UsePositionIteratoriter(range->usesBegin());iter;iter++){LNode*ins=insData[iter->pos];if(!conflict||outputOf(ins)>=conflictEnd){if(isRegisterUse(*iter,ins,/* considerCopy = */true)){firstRegisterFrom=inputOf(ins);break;}}}if(firstRegisterFrom.bits())break;}if(!firstRegisterFrom.bits()){// Can't trim non-register uses off the beginning by splitting.JitSpew(JitSpew_RegAlloc," bundle has no register uses");returntrue;}JitSpew(JitSpew_RegAlloc," split before first register use at %u",firstRegisterFrom.bits());SplitPositionVectorsplitPositions;if(!splitPositions.append(firstRegisterFrom))returnfalse;*success=true;returnsplitAt(bundle,splitPositions);}// When splitting a bundle according to a list of split positions, return// whether a use or range at |pos| should use a different bundle than the last// position this was called for.staticboolUseNewBundle(constSplitPositionVector&splitPositions,CodePositionpos,size_t*activeSplitPosition){if(splitPositions.empty()){// When the split positions are empty we are splitting at all uses.returntrue;}if(*activeSplitPosition==splitPositions.length()){// We've advanced past all split positions.returnfalse;}if(splitPositions[*activeSplitPosition]>pos){// We haven't gotten to the next split position yet.returnfalse;}// We've advanced past the next split position, find the next one which we// should split at.while(*activeSplitPosition<splitPositions.length()&&splitPositions[*activeSplitPosition]<=pos){(*activeSplitPosition)++;}returntrue;}staticboolHasPrecedingRangeSharingVreg(LiveBundle*bundle,LiveRange*range){MOZ_ASSERT(range->bundle()==bundle);for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*prevRange=LiveRange::get(*iter);if(prevRange==range)returnfalse;if(prevRange->vreg()==range->vreg())returntrue;}MOZ_CRASH();}staticboolHasFollowingRangeSharingVreg(LiveBundle*bundle,LiveRange*range){MOZ_ASSERT(range->bundle()==bundle);boolfoundRange=false;for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*prevRange=LiveRange::get(*iter);if(foundRange&&prevRange->vreg()==range->vreg())returntrue;if(prevRange==range)foundRange=true;}MOZ_ASSERT(foundRange);returnfalse;}boolBacktrackingAllocator::splitAt(LiveBundle*bundle,constSplitPositionVector&splitPositions){// Split the bundle at the given split points. Register uses which have no// intervening split points are consolidated into the same bundle. If the// list of split points is empty, then all register uses are placed in// minimal bundles.// splitPositions should be sorted.for(size_ti=1;i<splitPositions.length();++i)MOZ_ASSERT(splitPositions[i-1]<splitPositions[i]);// We don't need to create a new spill bundle if there already is one.boolspillBundleIsNew=false;LiveBundle*spillBundle=bundle->spillParent();if(!spillBundle){spillBundle=LiveBundle::FallibleNew(alloc(),bundle->spillSet(),nullptr);if(!spillBundle)returnfalse;spillBundleIsNew=true;for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);CodePositionfrom=range->from();if(isRegisterDefinition(range))from=minimalDefEnd(insData[from]).next();if(from<range->to()){if(!spillBundle->addRange(alloc(),range->vreg(),from,range->to()))returnfalse;if(range->hasDefinition()&&!isRegisterDefinition(range))spillBundle->lastRange()->setHasDefinition();}}}LiveBundleVectornewBundles;// The bundle which ranges are currently being added to.LiveBundle*activeBundle=LiveBundle::FallibleNew(alloc(),bundle->spillSet(),spillBundle);if(!activeBundle||!newBundles.append(activeBundle))returnfalse;// State for use by UseNewBundle.size_tactiveSplitPosition=0;// Make new bundles according to the split positions, and distribute ranges// and uses to them.for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);if(UseNewBundle(splitPositions,range->from(),&activeSplitPosition)){activeBundle=LiveBundle::FallibleNew(alloc(),bundle->spillSet(),spillBundle);if(!activeBundle||!newBundles.append(activeBundle))returnfalse;}LiveRange*activeRange=LiveRange::FallibleNew(alloc(),range->vreg(),range->from(),range->to());if(!activeRange)returnfalse;activeBundle->addRange(activeRange);if(isRegisterDefinition(range))activeRange->setHasDefinition();while(range->hasUses()){UsePosition*use=range->popUse();LNode*ins=insData[use->pos];// Any uses of a register that appear before its definition has// finished must be associated with the range for that definition.if(isRegisterDefinition(range)&&use->pos<=minimalDefEnd(insData[range->from()])){activeRange->addUse(use);}elseif(isRegisterUse(use,ins)){// Place this register use into a different bundle from the// last one if there are any split points between the two uses.// UseNewBundle always returns true if we are splitting at all// register uses, but we can still reuse the last range and// bundle if they have uses at the same position, except when// either use is fixed (the two uses might require incompatible// registers.)if(UseNewBundle(splitPositions,use->pos,&activeSplitPosition)&&(!activeRange->hasUses()||activeRange->usesBegin()->pos!=use->pos||activeRange->usesBegin()->usePolicy()==LUse::FIXED||use->usePolicy()==LUse::FIXED)){activeBundle=LiveBundle::FallibleNew(alloc(),bundle->spillSet(),spillBundle);if(!activeBundle||!newBundles.append(activeBundle))returnfalse;activeRange=LiveRange::FallibleNew(alloc(),range->vreg(),range->from(),range->to());if(!activeRange)returnfalse;activeBundle->addRange(activeRange);}activeRange->addUse(use);}else{MOZ_ASSERT(spillBundleIsNew);spillBundle->rangeFor(use->pos)->addUse(use);}}}LiveBundleVectorfilteredBundles;// Trim the ends of ranges in each new bundle when there are no other// earlier or later ranges in the same bundle with the same vreg.for(size_ti=0;i<newBundles.length();i++){LiveBundle*bundle=newBundles[i];for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;){LiveRange*range=LiveRange::get(*iter);if(!range->hasDefinition()){if(!HasPrecedingRangeSharingVreg(bundle,range)){if(range->hasUses()){UsePosition*use=*range->usesBegin();range->setFrom(inputOf(insData[use->pos]));}else{bundle->removeRangeAndIncrementIterator(iter);continue;}}}if(!HasFollowingRangeSharingVreg(bundle,range)){if(range->hasUses()){UsePosition*use=range->lastUse();range->setTo(use->pos.next());}elseif(range->hasDefinition()){range->setTo(minimalDefEnd(insData[range->from()]).next());}else{bundle->removeRangeAndIncrementIterator(iter);continue;}}iter++;}if(bundle->hasRanges()&&!filteredBundles.append(bundle))returnfalse;}if(spillBundleIsNew&&!filteredBundles.append(spillBundle))returnfalse;returnsplitAndRequeueBundles(bundle,filteredBundles);}boolBacktrackingAllocator::splitAcrossCalls(LiveBundle*bundle){// Split the bundle to separate register uses and non-register uses and// allow the vreg to be spilled across its range.// Find the locations of all calls in the bundle's range.SplitPositionVectorcallPositions;for(LiveRange::BundleLinkIteratoriter=bundle->rangesBegin();iter;iter++){LiveRange*range=LiveRange::get(*iter);CallRangesearchRange(range->from(),range->to());CallRange*callRange;if(!callRanges.contains(&searchRange,&callRange)){// There are no calls inside this range.continue;}MOZ_ASSERT(range->covers(callRange->range.from));// The search above returns an arbitrary call within the range. Walk// backwards to find the first call in the range.for(CallRangeList::reverse_iteratorriter=callRangesList.rbegin(callRange);riter!=callRangesList.rend();++riter){CodePositionpos=riter->range.from;if(range->covers(pos))callRange=*riter;elsebreak;}// Add all call positions within the range, by walking forwards.for(CallRangeList::iteratoriter=callRangesList.begin(callRange);iter!=callRangesList.end();++iter){CodePositionpos=iter->range.from;if(!range->covers(pos))break;// Calls at the beginning of the range are ignored; there is no splitting to do.if(range->covers(pos.previous())){MOZ_ASSERT_IF(callPositions.length(),pos>callPositions.back());if(!callPositions.append(pos))returnfalse;}}}MOZ_ASSERT(callPositions.length());#ifdef JS_JITSPEWJitSpewStart(JitSpew_RegAlloc," split across calls at ");for(size_ti=0;i<callPositions.length();++i)JitSpewCont(JitSpew_RegAlloc,"%s%u",i!=0?", ":"",callPositions[i].bits());JitSpewFin(JitSpew_RegAlloc);#endifreturnsplitAt(bundle,callPositions);}boolBacktrackingAllocator::chooseBundleSplit(LiveBundle*bundle,boolfixed,LiveBundle*conflict){boolsuccess=false;if(!trySplitAcrossHotcode(bundle,&success))returnfalse;if(success)returntrue;if(fixed)returnsplitAcrossCalls(bundle);if(!trySplitBeforeFirstRegisterUse(bundle,conflict,&success))returnfalse;if(success)returntrue;if(!trySplitAfterLastRegisterUse(bundle,conflict,&success))returnfalse;if(success)returntrue;// Split at all register uses.SplitPositionVectoremptyPositions;returnsplitAt(bundle,emptyPositions);}